Allow preemption of long-running hypercalls for softirq processing.
return 0;
}
+void hypercall_create_continuation(unsigned int op, unsigned int nr_args, ...)
+{
+ execution_context_t *ec = get_execution_context();
+ unsigned long *preg = &ec->ebx;
+ unsigned int i;
+ va_list args;
+
+ ec->eax = op;
+ ec->eip -= 2; /* re-execute 'int 0x82' */
+
+ va_start(args, nr_args);
+ for ( i = 0; i < nr_args; i++ )
+ *preg++ = va_arg(args, unsigned long);
+ va_end(args);
+}
+
#endif
for ( i = 0; i < count; i++ )
{
+ hypercall_may_preempt(
+ __HYPERVISOR_mmu_update, 3, ureqs, count-i, success_count);
+
if ( unlikely(__copy_from_user(&req, ureqs, sizeof(req)) != 0) )
{
MEM_LOG("Bad __copy_from_user");
}
if ( unlikely(success_count != NULL) )
- put_user(count, success_count);
+ put_user(i, success_count);
return rc;
}
for ( ; ; )
{
+ hypercall_may_preempt(__HYPERVISOR_set_trap_table, 1, traps);
+
if ( copy_from_user(&cur, traps, sizeof(cur)) ) return -EFAULT;
if ( cur.address == 0 ) break;
jnc bad_multicall_address
multicall_loop:
pushl %ecx
+ movl 4(%esp),%ecx # %ecx = struct domain
+ movl DOMAIN_processor(%ecx),%eax
+ shl $6,%eax # sizeof(irq_cpustat) == 64
+ testl $~0,SYMBOL_NAME(irq_stat)(%eax,1)
+ jnz multicall_preempt
multicall_fault1:
pushl 20(%ebx) # args[4]
multicall_fault2:
xorl %eax,%eax
jmp ret_from_hypercall
+multicall_preempt:
+ # NB. remaining nr_calls is already at top of stack
+ pushl %ebx # call_list
+ pushl $2 # nr_args == 2
+ pushl $__HYPERVISOR_multicall # op == __HYPERVISOR_multicall
+ call hypercall_create_continuation
+ addl $16,%esp
+ popl %ebx
+ movl $__HYPERVISOR_multicall,%eax
+ jmp ret_from_hypercall
+
bad_multicall_address:
popl %ebx
movl $-EFAULT,%eax
for ( i = 0; i < nr_extents; i++ )
{
+ hypercall_may_preempt(
+ __HYPERVISOR_dom_mem_op, 5,
+ MEMOP_increase_reservation,
+ &extent_list[i], nr_extents-i, extent_order,
+ (d == current) ? DOMID_SELF : d->id);
+
if ( unlikely((page = alloc_domheap_pages(d, extent_order)) == NULL) )
{
DPRINTK("Could not allocate a frame\n");
for ( i = 0; i < nr_extents; i++ )
{
+ hypercall_may_preempt(
+ __HYPERVISOR_dom_mem_op, 5,
+ MEMOP_decrease_reservation,
+ &extent_list[i], nr_extents-i, extent_order,
+ (d == current) ? DOMID_SELF : d->id);
+
if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
return i;
{
long rc;
+ /* XXX stubbed out XXX */
+ return -ENOSYS;
+
if ( count > 512 )
return -EINVAL;
*/
#include <xen/config.h>
+#include <asm/hardirq.h>
/*
* Simple wrappers reducing source bloat. Define all irq_stat fields
* definitions instead of differing sets for each arch.
*/
-extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
+extern irq_cpustat_t irq_stat[];
#ifdef CONFIG_SMP
#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
#include <asm/current.h>
#include <xen/spinlock.h>
#include <xen/grant_table.h>
+#include <xen/irq_cpustat.h>
extern unsigned long volatile jiffies;
extern rwlock_t domlist_lock;
void continue_nonidle_task(void);
+void hypercall_create_continuation(unsigned int op, unsigned int nr_args, ...);
+#define hypercall_may_preempt(_op, _nr_args, _args...) \
+ do { \
+ if ( unlikely(softirq_pending(smp_processor_id())) ) { \
+ hypercall_create_continuation(_op , _nr_args , ##_args); \
+ return _op; \
+ } } while ( 0 )
+
/* This domain_hash and domain_list are protected by the domlist_lock. */
#define DOMAIN_HASH_SIZE 256
#define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))